, existinghooks :: M.Map Git.Hook.Hook Bool
, workers :: Maybe (TMVar (WorkerPool (AnnexState, AnnexRead)))
, cachedcurrentbranch :: (Maybe (Maybe Git.Branch, Maybe Adjustment))
- , cachedgitenv :: Maybe (AltIndexFile, FilePath, [(String, String)])
+ , cachedgitenv :: Maybe (AltIndexFile, OsPath, [(String, String)])
, urloptions :: Maybe UrlOptions
, insmudgecleanfilter :: Bool
, getvectorclock :: IO CandidateVectorClock
import Data.ByteString.Builder
import Control.Concurrent (threadDelay)
import Control.Concurrent.MVar
-import qualified System.FilePath.ByteString as P
import System.PosixCompat.Files (isRegularFile)
import Annex.Common
branchFiles = withIndex $ inRepo branchFiles'
branchFiles' :: Git.Repo -> IO ([OsPath], IO Bool)
-branchFiles' = Git.Command.pipeNullSplit' $
+branchFiles' = Git.Command.pipeNullSplit'' toOsPath $
lsTreeParams Git.LsTree.LsTreeRecursive (Git.LsTree.LsTreeLong False)
fullname
[Param "--name-only"]
prepareModifyIndex :: JournalLocked -> Annex ()
prepareModifyIndex _jl = do
index <- fromRepo gitAnnexIndex
- void $ liftIO $ tryIO $ R.removeLink (index <> ".lock")
+ void $ liftIO $ tryIO $
+ removeFile (index <> literalOsPath ".lock")
{- Runs an action using the branch's index file. -}
withIndex :: Annex a -> Annex a
withIndex' bootstrapping a = withIndexFile AnnexIndexFile $ \f -> do
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
unless bootstrapping create
- createAnnexDirectory $ toOsPath $ takeDirectory f
+ createAnnexDirectory $ takeDirectory f
unless bootstrapping $ inRepo genIndex
a
{- Checks if the index needs to be updated. -}
needUpdateIndex :: Git.Ref -> Annex Bool
needUpdateIndex branchref = do
- f <- toOsPath <$> fromRepo gitAnnexIndexStatus
+ f <- fromRepo gitAnnexIndexStatus
committedref <- Git.Ref . firstLine' <$>
liftIO (catchDefaultIO mempty $ F.readFile' f)
return (committedref /= branchref)
Git.UpdateIndex.streamUpdateIndex g
[genstream dir h jh jlogh]
commitindex
- liftIO $ cleanup (fromOsPath dir) jlogh jlogf
+ liftIO $ cleanup dir jlogh jlogf
where
genstream dir h jh jlogh streamer = readDirectory jh >>= \case
Nothing -> return ()
Just file -> do
- let path = dir P.</> file
- unless (dirCruft file) $ whenM (isfile path) $ do
+ let file' = toOsPath file
+ let path = dir </> file'
+ unless (file' `elem` dirCruft) $ whenM (isfile path) $ do
sha <- Git.HashObject.hashFile h path
B.hPutStr jlogh (file <> "\n")
streamer $ Git.UpdateIndex.updateIndexLine
- sha TreeFile (asTopFilePath $ fileJournal file)
+ sha TreeFile (asTopFilePath $ fileJournal file')
genstream dir h jh jlogh streamer
- isfile file = isRegularFile <$> R.getFileStatus file
+ isfile file = isRegularFile <$> R.getFileStatus (fromOsPath file)
-- Clean up the staged files, as listed in the temp log file.
-- The temp file is used to avoid needing to buffer all the
-- filenames in memory.
hFlush jlogh
hSeek jlogh AbsoluteSeek 0
stagedfs <- lines <$> hGetContents jlogh
- mapM_ (removeFile . (dir </>)) stagedfs
+ mapM_ (removeFile . (dir </>) . toOsPath) stagedfs
hClose jlogh
removeWhenExistsWith (R.removeLink) (fromOsPath jlogf)
- openjlog tmpdir = liftIO $ openTmpFileIn (toOsPath tmpdir) (toOsPath "jlog")
+ openjlog tmpdir = liftIO $ openTmpFileIn tmpdir (literalOsPath "jlog")
getLocalTransitions :: Annex Transitions
getLocalTransitions =
S.fromList . mapMaybe Git.Sha.extractSha . fileLines' <$> content
where
content = do
- f <- toOsPath <$> fromRepo gitAnnexIgnoredRefs
+ f <- fromRepo gitAnnexIgnoredRefs
liftIO $ catchDefaultIO mempty $ F.readFile' f
addMergedRefs :: [(Git.Sha, Git.Branch)] -> Annex ()
getMergedRefs' :: Annex [(Git.Sha, Git.Branch)]
getMergedRefs' = do
- f <- toOsPath <$> fromRepo gitAnnexMergedRefs
+ f <- fromRepo gitAnnexMergedRefs
s <- liftIO $ catchDefaultIO mempty $ F.readFile' f
return $ map parse $ fileLines' s
where
import Annex.Perms
#endif
-import qualified System.FilePath.ByteString as P
-
{- Checks if a given key's content is currently present. -}
inAnnex :: Key -> Annex Bool
-inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist
+inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist . fromOsPath
{- Runs an arbitrary check on a key's content. -}
-inAnnexCheck :: Key -> (RawFilePath -> Annex Bool) -> Annex Bool
+inAnnexCheck :: Key -> (OsPath -> Annex Bool) -> Annex Bool
inAnnexCheck key check = inAnnex' id False check key
{- inAnnex that performs an arbitrary check of the key's content. -}
-inAnnex' :: (a -> Bool) -> a -> (RawFilePath -> Annex a) -> Key -> Annex a
+inAnnex' :: (a -> Bool) -> a -> (OsPath -> Annex a) -> Key -> Annex a
inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do
r <- check loc
if isgood r
objectFileExists :: Key -> Annex Bool
objectFileExists key =
calcRepo (gitAnnexLocation key)
- >>= liftIO . R.doesPathExist
+ >>= liftIO . doesFileExist
{- A safer check; the key's content must not only be present, but
- is not in the process of being removed. -}
{- The content file must exist, but the lock file generally
- won't exist unless a removal is in process. -}
checklock (Just lockfile) contentfile =
- ifM (liftIO $ doesFileExist (fromRawFilePath contentfile))
+ ifM (liftIO $ doesFileExist contentfile)
( checkOr is_unlocked lockfile
, return is_missing
)
Just True -> is_locked
Just False -> is_unlocked
#else
- checklock Nothing contentfile = liftIO $ ifM (doesFileExist (fromRawFilePath contentfile))
+ checklock Nothing contentfile = liftIO $ ifM (doesFileExist contentfile)
( lockShared contentfile >>= \case
Nothing -> return is_locked
Just lockhandle -> do
{- In Windows, see if we can take a shared lock. If so,
- remove the lock file to clean up after ourselves. -}
checklock (Just lockfile) contentfile =
- ifM (liftIO $ doesFileExist (fromRawFilePath contentfile))
+ ifM (liftIO $ doesFileExist contentfile)
( modifyContentDir lockfile $ liftIO $
lockShared lockfile >>= \case
Nothing -> return is_locked
- content locking works, from running at the same time as content is locked
- using the old method.
-}
-withContentLockFile :: Key -> (Maybe RawFilePath -> Annex a) -> Annex a
+withContentLockFile :: Key -> (Maybe OsPath -> Annex a) -> Annex a
withContentLockFile k a = do
v <- getVersion
if versionNeedsWritableContentFiles v
- will switch over to v10 content lock files at the
- right time. -}
gitdir <- fromRepo Git.localGitDir
- let gitconfig = gitdir P.</> "config"
+ let gitconfig = gitdir </> literalOsPath "config"
ic <- withTSDelta (liftIO . genInodeCache gitconfig)
oldic <- Annex.getState Annex.gitconfiginodecache
v' <- if fromMaybe False (compareStrong <$> ic <*> oldic)
where
go v = contentLockFile k v >>= a
-contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe RawFilePath)
+contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe OsPath)
#ifndef mingw32_HOST_OS
{- Older versions of git-annex locked content files themselves, but newer
- versions use a separate lock file, to better support repos shared
#endif
{- Performs an action, passing it the location to use for a key's content. -}
-withObjectLoc :: Key -> (RawFilePath -> Annex a) -> Annex a
+withObjectLoc :: Key -> (OsPath -> Annex a) -> Annex a
withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key)
{- Check if a file contains the unmodified content of the key.
- The expensive way to tell is to do a verification of its content.
- The cheaper way is to see if the InodeCache for the key matches the
- file. -}
-isUnmodified :: Key -> RawFilePath -> Annex Bool
+isUnmodified :: Key -> OsPath -> Annex Bool
isUnmodified key f =
withTSDelta (liftIO . genInodeCache f) >>= \case
Just fc -> do
isUnmodified' key f fc ic
Nothing -> return False
-isUnmodified' :: Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool
+isUnmodified' :: Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool
isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches
{- Cheap check if a file contains the unmodified content of the key,
- this may report a false positive when repeated edits are made to a file
- within a small time window (eg 1 second).
-}
-isUnmodifiedCheap :: Key -> RawFilePath -> Annex Bool
+isUnmodifiedCheap :: Key -> OsPath -> Annex Bool
isUnmodifiedCheap key f = maybe (pure False) (isUnmodifiedCheap' key)
=<< withTSDelta (liftIO . genInodeCache f)
import Annex.InodeSentinal
import Utility.InodeCache
-isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool
+isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool
isUnmodifiedLowLevel addinodecaches key f fc ic =
isUnmodifiedCheapLowLevel fc ic <||> expensivecheck
where
import Config.Smudge
{- Runs an action using a different git index file. -}
-withIndexFile :: AltIndexFile -> (FilePath -> Annex a) -> Annex a
+withIndexFile :: AltIndexFile -> (OsPath -> Annex a) -> Annex a
withIndexFile i = withAltRepo usecachedgitenv restoregitenv
where
-- This is an optimisation. Since withIndexFile is run repeatedly,
f <- indexEnvVal $ case i of
AnnexIndexFile -> gitAnnexIndex g
ViewIndexFile -> gitAnnexViewIndex g
- g' <- addGitEnv g indexEnv f
+ g' <- addGitEnv g indexEnv (fromOsPath f)
return (g', f)
restoregitenv g g' = g' { gitEnv = gitEnv g }
type LinkTarget = S.ByteString
{- Checks if a file is a link to a key. -}
-isAnnexLink :: RawFilePath -> Annex (Maybe Key)
+isAnnexLink :: OsPath -> Annex (Maybe Key)
isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget file
{- Gets the link target of a symlink.
- Returns Nothing if the file is not a symlink, or not a link to annex
- content.
-}
-getAnnexLinkTarget :: RawFilePath -> Annex (Maybe LinkTarget)
+getAnnexLinkTarget :: OsPath -> Annex (Maybe LinkTarget)
getAnnexLinkTarget f = getAnnexLinkTarget' f
=<< (coreSymlinks <$> Annex.getGitConfig)
{- Pass False to force looking inside file, for when git checks out
- symlinks as plain files. -}
-getAnnexLinkTarget' :: RawFilePath -> Bool -> Annex (Maybe S.ByteString)
+getAnnexLinkTarget' :: OsPath -> Bool -> Annex (Maybe S.ByteString)
getAnnexLinkTarget' file coresymlinks = if coresymlinks
then check probesymlink $
return Nothing
| otherwise -> return Nothing
Nothing -> fallback
- probesymlink = R.readSymbolicLink file
+ probesymlink = R.readSymbolicLink (fromOsPath file)
- probefilecontent = F.withFile (toOsPath file) ReadMode $ \h -> do
+ probefilecontent = F.withFile file ReadMode $ \h -> do
s <- S.hGet h maxSymlinkSz
-- If we got the full amount, the file is too large
-- to be a symlink target.
let replaceindex = liftIO $ moveFile tmpindex realindex
let updatetmpindex = do
r' <- liftIO $ Git.Env.addGitEnv r Git.Index.indexEnv
+ . fromOsPath
=<< Git.Index.indexEnvVal tmpindex
configfilterprocess numsz $
runupdateindex tsd r' replaceindex
fdToHandle fd
in bracket open hClose readhandle
#else
- ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (toRawFilePath f))
+ ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath f))
( return Nothing
, F.withFile f ReadMode readhandle
)
-
- Also, can generate new metadata, if configured to do so.
-}
-genMetaData :: Key -> RawFilePath -> Maybe POSIXTime -> Annex ()
+genMetaData :: Key -> OsPath -> Maybe POSIXTime -> Annex ()
genMetaData key file mmtime = do
catKeyFileHEAD file >>= \case
Nothing -> noop
Nothing -> noop
where
warncopied = warning $ UnquotedString $
- "Copied metadata from old version of " ++ fromRawFilePath file ++ " to new version. " ++
- "If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromRawFilePath file
+ "Copied metadata from old version of " ++ fromOsPath file ++ " to new version. " ++
+ "If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromOsPath file
-- If the only fields copied were date metadata, and they'll
-- be overwritten with the current mtime, no need to warn about
-- copying.
import Annex.WorkerPool
import Types.WorkerPool
import Types.Key
+import qualified Utility.FileIO as F
import Control.Concurrent.STM
import Control.Concurrent.Async
import qualified Data.ByteString as S
#if WITH_INOTIFY
import qualified System.INotify as INotify
-import qualified System.FilePath.ByteString as P
#endif
shouldVerify :: VerifyConfig -> Annex Bool
- If the RetrievalSecurityPolicy requires verification and the key's
- backend doesn't support it, the verification will fail.
-}
-verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> RawFilePath -> Annex Bool
+verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> OsPath -> Annex Bool
verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification) of
(_, Verified) -> return True
(RetrievalVerifiableKeysSecure, _) -> ifM (isVerifiable k)
-- When possible, does an incremental verification, because that can be
-- faster. Eg, the VURL backend can need to try multiple checksums and only
-- with an incremental verification does it avoid reading files twice.
-verifyKeyContent :: Key -> RawFilePath -> Annex Bool
+verifyKeyContent :: Key -> OsPath -> Annex Bool
verifyKeyContent k f = verifyKeySize k f <&&> verifyKeyContent' k f
-- Does not verify size.
-verifyKeyContent' :: Key -> RawFilePath -> Annex Bool
+verifyKeyContent' :: Key -> OsPath -> Annex Bool
verifyKeyContent' k f =
Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
Nothing -> return True
iv <- mkiv k
showAction (UnquotedString (descIncrementalVerifier iv))
res <- liftIO $ catchDefaultIO Nothing $
- withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do
+ F.withBinaryFile f ReadMode $ \h -> do
feedIncrementalVerifier h iv
finalizeIncrementalVerifier iv
case res of
Just verifier -> verifier k f
(Nothing, Just verifier) -> verifier k f
-resumeVerifyKeyContent :: Key -> RawFilePath -> IncrementalVerifier -> Annex Bool
+resumeVerifyKeyContent :: Key -> OsPath -> IncrementalVerifier -> Annex Bool
resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \case
Nothing -> fallback
Just endpos -> do
| otherwise = do
showAction (UnquotedString (descIncrementalVerifier iv))
liftIO $ catchDefaultIO (Just False) $
- withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do
+ F.withBinaryFile f ReadMode $ \h -> do
hSeek h AbsoluteSeek endpos
feedIncrementalVerifier h iv
finalizeIncrementalVerifier iv
where
chunk = 65536
-verifyKeySize :: Key -> RawFilePath -> Annex Bool
+verifyKeySize :: Key -> OsPath -> Annex Bool
verifyKeySize k f = case fromKey keySize k of
Just size -> do
size' <- liftIO $ catchDefaultIO 0 $ getFileSize f
-- and if the disk is slow, the reader may never catch up to the writer,
-- and the disk cache may never speed up reads. So this should only be
-- used when there's not a better way to incrementally verify.
-tailVerify :: Maybe IncrementalVerifier -> RawFilePath -> Annex a -> Annex a
+tailVerify :: Maybe IncrementalVerifier -> OsPath -> Annex a -> Annex a
tailVerify (Just iv) f writer = do
finished <- liftIO newEmptyTMVarIO
t <- liftIO $ async $ tailVerify' iv f finished
writer `finally` finishtail
tailVerify Nothing _ writer = writer
-tailVerify' :: IncrementalVerifier -> RawFilePath -> TMVar () -> IO ()
+tailVerify' :: IncrementalVerifier -> OsPath -> TMVar () -> IO ()
#if WITH_INOTIFY
tailVerify' iv f finished =
tryNonAsync go >>= \case
-- of resuming, and waiting for modification deals with such
-- situations.
inotifydirchange i cont =
- INotify.addWatch i [INotify.Modify] dir $ \case
+ INotify.addWatch i [INotify.Modify] (fromOsPath dir) $ \case
-- Ignore changes to other files in the directory.
INotify.Modified { INotify.maybeFilePath = fn }
- | fn == Just basef -> cont
+ | fn == Just basef' -> cont
_ -> noop
where
- (dir, basef) = P.splitFileName f
+ (dir, basef) = splitFileName f
+ basef' = fromOsPath basef
- inotifyfilechange i = INotify.addWatch i [INotify.Modify] f . const
+ inotifyfilechange i = INotify.addWatch i [INotify.Modify] (fromOsPath f) . const
go = INotify.withINotify $ \i -> do
modified <- newEmptyTMVarIO
case v of
Just () -> do
r <- tryNonAsync $
- tryWhenExists (openBinaryFile (fromRawFilePath f) ReadMode) >>= \case
+ tryWhenExists (F.openBinaryFile f ReadMode) >>= \case
Just h -> return (Just h)
-- File does not exist, must have been
-- deleted. Wait for next modification
- When in an adjusted branch that may have hidden the file, looks for a
- pointer to a key in the original branch.
-}
-lookupKey :: RawFilePath -> Annex (Maybe Key)
+lookupKey :: OsPath -> Annex (Maybe Key)
lookupKey = lookupKey' catkeyfile
where
catkeyfile file =
- ifM (liftIO $ doesFileExist $ fromRawFilePath file)
+ ifM (liftIO $ doesFileExist file)
( catKeyFile file
, catKeyFileHidden file =<< getCurrentBranch
)
- changes in the work tree. This means it's slower, but it also has
- consistently the same behavior for locked files as for unlocked files.
-}
-lookupKeyStaged :: RawFilePath -> Annex (Maybe Key)
+lookupKeyStaged :: OsPath -> Annex (Maybe Key)
lookupKeyStaged file = catKeyFile file >>= \case
Just k -> return (Just k)
Nothing -> catKeyFileHidden file =<< getCurrentBranch
{- Like lookupKey, but does not find keys for hidden files. -}
-lookupKeyNotHidden :: RawFilePath -> Annex (Maybe Key)
+lookupKeyNotHidden :: OsPath -> Annex (Maybe Key)
lookupKeyNotHidden = lookupKey' catkeyfile
where
catkeyfile file =
- ifM (liftIO $ doesFileExist $ fromRawFilePath file)
+ ifM (liftIO $ doesFileExist file)
( catKeyFile file
, return Nothing
)
-lookupKey' :: (RawFilePath -> Annex (Maybe Key)) -> RawFilePath -> Annex (Maybe Key)
+lookupKey' :: (OsPath -> Annex (Maybe Key)) -> OsPath -> Annex (Maybe Key)
lookupKey' catkeyfile file = isAnnexLink file >>= \case
Just key -> return (Just key)
Nothing -> catkeyfile file
getBackend file k = maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
Just backend -> return $ Just backend
Nothing -> do
- warning $ "skipping " <> QuotedPath (toRawFilePath file) <> " (" <>
+ warning $ "skipping " <> QuotedPath (toOsPath file) <> " (" <>
UnquotedString (unknownBackendVarietyMessage (fromKey keyVariety k)) <> ")"
return Nothing
{- Looks up the backend that should be used for a file.
- That can be configured on a per-file basis in the gitattributes file,
- or forced with --backend. -}
-chooseBackend :: RawFilePath -> Annex Backend
+chooseBackend :: OsPath -> Annex Backend
chooseBackend f = Annex.getRead Annex.forcebackend >>= go
where
go Nothing = do
| otherwise = return Nothing
-- The Backend must use a cryptographically secure hash.
-generateEquivilantKey :: Backend -> RawFilePath -> Annex (Maybe Key)
+generateEquivilantKey :: Backend -> OsPath -> Annex (Maybe Key)
generateEquivilantKey b f =
case genKey b of
Just genkey -> do
import qualified Git.DiffTree as DiffTree
import Logs
import qualified Logs.ContentIdentifier as Log
-import qualified Utility.RawFilePath as R
import Database.Persist.Sql hiding (Key)
import Database.Persist.TH
-import qualified System.FilePath.ByteString as P
#if MIN_VERSION_persistent_sqlite(2,13,3)
import Database.RawFilePath
openDb = do
dbdir <- calcRepo' gitAnnexContentIdentifierDbDir
let db = dbdir </> literalOsPath "db"
- isnew <- liftIO $ not <$> doesDirectoryPathExist db
+ isnew <- liftIO $ not <$> doesDirectoryExist db
if isnew
then initDb db $ void $
runMigrationSilent migrateContentIdentifier
-- Migrate from old versions of database, which had buggy
-- and suboptimal uniqueness constraints.
#if MIN_VERSION_persistent_sqlite(2,13,3)
- else liftIO $ runSqlite' db $ void $
+ else liftIO $ runSqlite' (fromOsPath db) $ void $
runMigrationSilent migrateContentIdentifier
#else
else liftIO $ runSqlite (T.pack (fromRawFilePath db)) $ void $
import Git.Sha
import Git.FilePath
import qualified Git.DiffTree
-import qualified Utility.RawFilePath as R
import Database.Persist.Sql hiding (Key)
import Database.Persist.TH
-import qualified System.FilePath.ByteString as P
data ExportHandle = ExportHandle H.DbQueue UUID
openDb :: UUID -> Annex ExportHandle
openDb u = do
dbdir <- calcRepo' (gitAnnexExportDbDir u)
- let db = dbdir P.</> "db"
- unlessM (liftIO $ R.doesPathExist db) $ do
+ let db = dbdir </> literalOsPath "db"
+ unlessM (liftIO $ doesDirectoryExist db) $ do
initDb db $ void $
runMigrationSilent migrateExport
h <- liftIO $ H.openDbQueue db "exported"
addExportedLocation h k el = queueDb h $ do
void $ insertUniqueFast $ Exported k ef
let edirs = map
- (\ed -> ExportedDirectory (SByteString (fromExportDirectory ed)) ef)
+ (\ed -> ExportedDirectory (SByteString (fromOsPath (fromExportDirectory ed))) ef)
(exportDirectories el)
putMany edirs
where
- ef = SByteString (fromExportLocation el)
+ ef = SByteString (fromOsPath (fromExportLocation el))
removeExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO ()
removeExportedLocation h k el = queueDb h $ do
deleteWhere [ExportedKey ==. k, ExportedFile ==. ef]
- let subdirs = map (SByteString . fromExportDirectory)
+ let subdirs = map
+ (SByteString . fromOsPath . fromExportDirectory)
(exportDirectories el)
deleteWhere [ExportedDirectoryFile ==. ef, ExportedDirectorySubdir <-. subdirs]
where
- ef = SByteString (fromExportLocation el)
+ ef = SByteString (fromOsPath (fromExportLocation el))
{- Note that this does not see recently queued changes. -}
getExportedLocation :: ExportHandle -> Key -> IO [ExportLocation]
getExportedLocation (ExportHandle h _) k = H.queryDbQueue h $ do
l <- selectList [ExportedKey ==. k] []
- return $ map (mkExportLocation . (\(SByteString f) -> f) . exportedFile . entityVal) l
+ return $ map (mkExportLocation . (\(SByteString f) -> toOsPath f) . exportedFile . entityVal) l
{- Note that this does not see recently queued changes. -}
isExportDirectoryEmpty :: ExportHandle -> ExportDirectory -> IO Bool
l <- selectList [ExportedDirectorySubdir ==. ed] []
return $ null l
where
- ed = SByteString $ fromExportDirectory d
+ ed = SByteString $ fromOsPath $ fromExportDirectory d
{- Get locations in the export that might contain a key. -}
getExportTree :: ExportHandle -> Key -> IO [ExportLocation]
getExportTree (ExportHandle h _) k = H.queryDbQueue h $ do
l <- selectList [ExportTreeKey ==. k] []
- return $ map (mkExportLocation . (\(SByteString f) -> f) . exportTreeFile . entityVal) l
+ return $ map (mkExportLocation . (\(SByteString f) -> toOsPath f) . exportTreeFile . entityVal) l
{- Get keys that might be currently exported to a location.
-
map (exportTreeKey . entityVal)
<$> selectList [ExportTreeFile ==. ef] []
where
- ef = SByteString (fromExportLocation el)
+ ef = SByteString (fromOsPath (fromExportLocation el))
addExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
addExportTree h k loc = queueDb h $
void $ insertUniqueFast $ ExportTree k ef
where
- ef = SByteString (fromExportLocation loc)
+ ef = SByteString (fromOsPath (fromExportLocation loc))
removeExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
removeExportTree h k loc = queueDb h $
deleteWhere [ExportTreeKey ==. k, ExportTreeFile ==. ef]
where
- ef = SByteString (fromExportLocation loc)
+ ef = SByteString (fromOsPath (fromExportLocation loc))
-- An action that is passed the old and new values that were exported,
-- and updates state.
import Types.MetaData
import Annex.MetaData.StandardFields
import Annex.LockFile
-import qualified Utility.RawFilePath as R
import Database.Persist.Sql hiding (Key)
import Database.Persist.TH
-import qualified System.FilePath.ByteString as P
import qualified Data.ByteString as B
import qualified Data.Set as S
openDb :: Annex ImportFeedDbHandle
openDb = do
dbdir <- calcRepo' gitAnnexImportFeedDbDir
- let db = dbdir P.</> "db"
- isnew <- liftIO $ not <$> R.doesPathExist db
+ let db = dbdir </> literalOsPath "db"
+ isnew <- liftIO $ not <$> doesDirectoryExist db
when isnew $
initDb db $ void $
runMigrationSilent migrateImportFeed
import qualified Git.Ref
import Config
import Config.Smudge
-import qualified Utility.RawFilePath as R
+import qualified Utility.OsString as OS
import qualified Data.ByteString as S
import qualified Data.ByteString.Char8 as S8
-import qualified System.FilePath.ByteString as P
import Control.Concurrent.Async
{- Runs an action that reads from the database.
lck <- calcRepo' gitAnnexKeysDbLock
catchPermissionDenied permerr $ withExclusiveLock lck $ do
dbdir <- calcRepo' gitAnnexKeysDbDir
- let db = dbdir P.</> "db"
- dbexists <- liftIO $ R.doesPathExist db
+ let db = dbdir </> literalOsPath "db"
+ dbexists <- liftIO $ doesDirectoryExist db
case dbexists of
True -> open db False
False -> do
)
{- Include a known associated file along with any recorded in the database. -}
-getAssociatedFilesIncluding :: AssociatedFile -> Key -> Annex [RawFilePath]
+getAssociatedFilesIncluding :: AssociatedFile -> Key -> Annex [OsPath]
getAssociatedFilesIncluding afile k = emptyWhenBare $ do
g <- Annex.gitRepo
l <- map (`fromTopFilePath` g) <$> getAssociatedFiles k
SQL.removeAssociatedFile k
{- Stats the files, and stores their InodeCaches. -}
-storeInodeCaches :: Key -> [RawFilePath] -> Annex ()
+storeInodeCaches :: Key -> [OsPath] -> Annex ()
storeInodeCaches k fs = withTSDelta $ \d ->
addInodeCaches k . catMaybes
=<< liftIO (mapM (\f -> genInodeCache f d) fs)
( return mempty
, do
gitindex <- inRepo currentIndexFile
- indexcache <- fromRawFilePath <$> calcRepo' gitAnnexKeysDbIndexCache
+ indexcache <- fromOsPath <$> calcRepo' gitAnnexKeysDbIndexCache
withTSDelta (liftIO . genInodeCache gitindex) >>= \case
Just cur -> readindexcache indexcache >>= \case
Nothing -> go cur indexcache =<< getindextree
-- be a pointer file. And a pointer file that is replaced with
-- a non-pointer file will match this. This is only a
-- prefilter so that's ok.
- , Param $ "-G" ++ fromRawFilePath (toInternalGitPath $
- P.pathSeparator `S.cons` objectDir)
+ , Param $ "-G" ++
+ fromOsPath (toInternalGitPath $
+ pathSeparator `OS.cons` objectDir)
-- Disable rename detection.
, Param "--no-renames"
-- Avoid other complications.
procdiff mdfeeder (info:file:rest) conflicted
| ":" `S.isPrefixOf` info = case S8.words info of
(_colonsrcmode:dstmode:srcsha:dstsha:status:[]) -> do
+ let file' = asTopFilePath (toOsPath file)
let conflicted' = status == "U"
-- avoid removing associated file when
-- there is a merge conflict
send mdfeeder (Ref srcsha) $ \case
Just oldkey -> do
liftIO $ SQL.removeAssociatedFile oldkey
- (asTopFilePath file)
- (SQL.WriteHandle qh)
+ file' (SQL.WriteHandle qh)
return True
Nothing -> return False
send mdfeeder (Ref dstsha) $ \case
Just key -> do
liftIO $ addassociatedfile key
- (asTopFilePath file)
- (SQL.WriteHandle qh)
+ file' (SQL.WriteHandle qh)
when (dstmode /= fmtTreeItemType TreeSymlink) $
- reconcilepointerfile (asTopFilePath file) key
+ reconcilepointerfile file' key
return True
Nothing -> return False
procdiff mdfeeder rest
procmergeconflictdiff mdfeeder (info:file:rest) conflicted
| ":" `S.isPrefixOf` info = case S8.words info of
(_colonmode:_mode:sha:_sha:status:[]) -> do
+ let file' = asTopFilePath (toOsPath file)
send mdfeeder (Ref sha) $ \case
Just key -> do
liftIO $ SQL.addAssociatedFile key
- (asTopFilePath file)
- (SQL.WriteHandle qh)
+ file' (SQL.WriteHandle qh)
return True
Nothing -> return False
let conflicted' = status == "U"
- convenience.
-}
pipeNullSplit' :: [CommandParam] -> Repo -> IO ([S.ByteString], IO Bool)
-pipeNullSplit' params repo = do
+pipeNullSplit' = pipeNullSplit'' id
+
+pipeNullSplit'' :: (S.ByteString -> t) -> [CommandParam] -> Repo -> IO ([t], IO Bool)
+pipeNullSplit'' f params repo = do
(s, cleanup) <- pipeNullSplit params repo
- return (map L.toStrict s, cleanup)
+ return (map (f . L.toStrict) s, cleanup)
pipeNullSplitStrict :: [CommandParam] -> Repo -> IO [S.ByteString]
pipeNullSplitStrict params repo = do
-
- So, an absolute path is the only safe option for this to return.
-}
-indexEnvVal :: OsPath -> IO String
-indexEnvVal p = fromOsPath <$> absPath p
+indexEnvVal :: OsPath -> IO OsPath
+indexEnvVal p = absPath p
{- Forces git to use the specified index file.
-
override index _r = do
res <- getEnv var
val <- indexEnvVal index
- setEnv var val True
+ setEnv var (fromOsPath val) True
return $ reset res
where
var = "GIT_INDEX_FILE"
data LoggedFileChange t = LoggedFileChange
{ changetime :: POSIXTime
, changed :: t
- , changedfile :: FilePath
+ , changedfile :: OsPath
, oldref :: Ref
, newref :: Ref
}
-> Maybe Ref
-> [FilePath]
-> [CommandParam]
- -> (Sha -> FilePath -> Maybe t)
+ -> (Sha -> OsPath -> Maybe t)
-> Repo
-> IO ([LoggedFileChange t], IO Bool)
getGitLog ref stopref fs os selector repo = do
--
-- The commitinfo is not included before all changelines, so
-- keep track of the most recently seen commitinfo.
-parseGitRawLog :: (Ref -> FilePath -> Maybe t) -> [String] -> [LoggedFileChange t]
+parseGitRawLog :: (Ref -> OsPath -> Maybe t) -> [String] -> [LoggedFileChange t]
parseGitRawLog selector = parse (deleteSha, epoch)
where
epoch = toEnum 0 :: POSIXTime
_ -> (oldcommitsha, oldts, cl')
mrc = do
(old, new) <- parseRawChangeLine cl
- v <- selector commitsha c2
+ let c2' = toOsPath c2
+ v <- selector commitsha c2'
return $ LoggedFileChange
{ changetime = ts
, changed = v
- , changedfile = c2
+ , changedfile = c2'
, oldref = old
, newref = new
}
- Note that this uses a --debug option whose output could change at some
- point in the future. If the output is not as expected, will use Nothing.
-}
-inodeCaches :: [OsPath] -> Repo -> IO ([(FilePath, Maybe InodeCache)], IO Bool)
+inodeCaches :: [OsPath] -> Repo -> IO ([(OsPath, Maybe InodeCache)], IO Bool)
inodeCaches locs repo = guardSafeForLsFiles repo $ do
(ls, cleanup) <- pipeNullSplit params repo
return (parse Nothing (map decodeBL ls), cleanup)
parse Nothing (f:ls) = parse (Just f) ls
parse (Just f) (s:[]) =
let i = parsedebug s
- in (f, i) : []
+ in (toOsPath f, i) : []
parse (Just f) (s:ls) =
let (d, f') = splitdebug s
i = parsedebug d
- in (f, i) : parse (Just f') ls
+ in (toOsPath f, i) : parse (Just f') ls
parse _ _ = []
-- First 5 lines are --debug output, remainder is the next filename.
getExportExcluded u = do
logf <- fromRepo $ gitAnnexExportExcludeLog u
liftIO $ catchDefaultIO [] $ exportExcludedParser
- <$> F.readFile (toOsPath logf)
+ <$> F.readFile logf
where
exportExcludedParser :: L.ByteString -> [Git.Tree.TreeItem]
map (toUUID . fromLogInfo . info)
(filterPresent (parseLog l))
-getLoggedLocations :: (RawFilePath -> Annex [LogInfo]) -> Key -> Annex [UUID]
+getLoggedLocations :: (OsPath -> Annex [LogInfo]) -> Key -> Annex [UUID]
getLoggedLocations getter key = do
config <- Annex.getGitConfig
locs <- map (toUUID . fromLogInfo) <$> getter (locationLogFile config key)
changedlocs _ _ _ Nothing = pure (S.empty, S.empty)
overLocationLogsHelper
- :: ((RawFilePath -> Maybe Key) -> (Annex (FileContents Key b) -> Annex v) -> Annex a)
- -> ((Maybe L.ByteString -> [UUID]) -> Key -> RawFilePath -> Maybe (L.ByteString, Maybe b) -> Annex u)
+ :: ((OsPath -> Maybe Key) -> (Annex (FileContents Key b) -> Annex v) -> Annex a)
+ -> ((Maybe L.ByteString -> [UUID]) -> Key -> OsPath -> Maybe (L.ByteString, Maybe b) -> Annex u)
-> Bool
-> v
-> (Annex (FileContents Key b) -> Annex v -> Annex v)
getCurrentMetaData :: Key -> Annex MetaData
getCurrentMetaData = getCurrentMetaData' metaDataLogFile
-getCurrentMetaData' :: (GitConfig -> Key -> RawFilePath) -> Key -> Annex MetaData
+getCurrentMetaData' :: (GitConfig -> Key -> OsPath) -> Key -> Annex MetaData
getCurrentMetaData' getlogfile k = do
config <- Annex.getGitConfig
parseCurrentMetaData <$> Annex.Branch.get (getlogfile config k)
addMetaData :: Key -> MetaData -> Annex ()
addMetaData = addMetaData' (Annex.Branch.RegardingUUID []) metaDataLogFile
-addMetaData' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> Annex ()
+addMetaData' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> OsPath) -> Key -> MetaData -> Annex ()
addMetaData' ru getlogfile k metadata =
addMetaDataClocked' ru getlogfile k metadata =<< currentVectorClock
addMetaDataClocked :: Key -> MetaData -> CandidateVectorClock -> Annex ()
addMetaDataClocked = addMetaDataClocked' (Annex.Branch.RegardingUUID []) metaDataLogFile
-addMetaDataClocked' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> CandidateVectorClock -> Annex ()
+addMetaDataClocked' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> OsPath) -> Key -> MetaData -> CandidateVectorClock -> Annex ()
addMetaDataClocked' ru getlogfile k d@(MetaData m) c
| d == emptyMetaData = noop
| otherwise = do
(const $ buildLog l)
return True
-readLog :: RawFilePath -> Annex (Log MetaData)
+readLog :: OsPath -> Annex (Log MetaData)
readLog = parseLog <$$> Annex.Branch.get
import Logs.File
import Logs
import Annex.CatFile
+import qualified Utility.OsString as OS
-import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as L
import Control.Concurrent.STM
-import System.FilePath.ByteString as P
-- | What to use to record a migration. This should be the same Sha that is
-- used to as the content of the annexed file in the HEAD branch.
n <- readTVar nv
let !n' = succ n
writeTVar nv n'
- return (asTopFilePath (encodeBS (show n')))
+ return (asTopFilePath (toOsPath (show n')))
let rec h r = liftIO $ sendMkTree h
(fromTreeItemType TreeFile)
BlobObject
n <- liftIO $ atomically $ readTVar nv
when (n > 0) $ do
treesha <- liftIO $ flip recordTree g $ Tree
- [ RecordedSubTree (asTopFilePath "old") oldt []
- , RecordedSubTree (asTopFilePath "new") newt []
+ [ RecordedSubTree (asTopFilePath (literalOsPath "old")) oldt []
+ , RecordedSubTree (asTopFilePath (literalOsPath "new")) newt []
]
commitsha <- Annex.Branch.rememberTreeish treesha
(asTopFilePath migrationTreeGraftPoint)
(stoppoint, toskip) <- getPerformedMigrations
(l, cleanup) <- inRepo $ getGitLog branchsha
(if incremental then stoppoint else Nothing)
- [fromRawFilePath migrationTreeGraftPoint]
+ [fromOsPath migrationTreeGraftPoint]
-- Need to follow because migrate.tree is grafted in
-- and then deleted, and normally git log stops when a file
-- gets deleted.
go toskip c
| newref c `elem` nullShas = return ()
| changed c `elem` toskip = return ()
- | not ("/new/" `B.isInfixOf` newfile) = return ()
+ | not (literalOsPath "/new/" `OS.isInfixOf` newfile) = return ()
| otherwise =
catKey (newref c) >>= \case
Nothing -> return ()
Nothing -> return ()
Just oldkey -> a oldkey newkey
where
- newfile = toRawFilePath (changedfile c)
+ newfile = changedfile c
oldfile = migrationTreeGraftPoint
- P.</> "old"
- P.</> P.takeBaseName (fromInternalGitPath newfile)
+ </> literalOsPath "old"
+ </> takeBaseName (fromInternalGitPath newfile)
oldfileref = branchFileRef (changed c) oldfile
getPerformedMigrations :: Annex (Maybe Sha, [Sha])
setLog requiredContentLog u expr
Annex.changeState $ \st -> st { Annex.requiredcontentmap = Nothing }
-setLog :: RawFilePath -> UUID -> PreferredContentExpression -> Annex ()
+setLog :: OsPath -> UUID -> PreferredContentExpression -> Annex ()
setLog logfile uuid@(UUID _) val = do
c <- currentVectorClock
Annex.Branch.change (Annex.Branch.RegardingUUID [uuid]) logfile $
import qualified Data.ByteString.Lazy as L
{- Adds to the log, removing any LogLines that are obsoleted. -}
-addLog :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> Annex ()
+addLog :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> Annex ()
addLog ru file logstatus loginfo =
addLog' ru file logstatus loginfo =<< currentVectorClock
-addLog' :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> CandidateVectorClock -> Annex ()
+addLog' :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> CandidateVectorClock -> Annex ()
addLog' ru file logstatus loginfo c =
Annex.Branch.changeOrAppend ru file $ \b ->
let old = parseLog b
- When the log was changed, the onchange action is run (with the journal
- still locked to prevent any concurrent changes) and True is returned.
-}
-maybeAddLog :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> Annex () -> Annex Bool
+maybeAddLog :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> Annex () -> Annex Bool
maybeAddLog ru file logstatus loginfo onchange = do
c <- currentVectorClock
let f = \b ->
{- Reads a log file.
- Note that the LogLines returned may be in any order. -}
-readLog :: RawFilePath -> Annex [LogLine]
+readLog :: OsPath -> Annex [LogLine]
readLog = parseLog <$$> Annex.Branch.get
{- Reads a log and returns only the info that is still present. -}
-presentLogInfo :: RawFilePath -> Annex [LogInfo]
+presentLogInfo :: OsPath -> Annex [LogInfo]
presentLogInfo file = map info . filterPresent <$> readLog file
{- Reads a log and returns only the info that is no longer present. -}
-notPresentLogInfo :: RawFilePath -> Annex [LogInfo]
+notPresentLogInfo :: OsPath -> Annex [LogInfo]
notPresentLogInfo file = map info . filterNotPresent <$> readLog file
{- Reads a historical version of a log and returns the info that was in
-
- The date is formatted as shown in gitrevisions man page.
-}
-historicalLogInfo :: RefDate -> RawFilePath -> Annex [LogInfo]
+historicalLogInfo :: RefDate -> OsPath -> Annex [LogInfo]
historicalLogInfo refdate file = parseLogInfo
<$> Annex.Branch.getHistorical refdate file
getLastRunTimes :: Annex (M.Map ScheduledActivity LocalTime)
getLastRunTimes = do
- f <- fromRawFilePath <$> fromRepo gitAnnexScheduleState
+ f <- fromOsPath <$> fromRepo gitAnnexScheduleState
liftIO $ fromMaybe M.empty
<$> catchDefaultIO Nothing (readish <$> readFile f)
import qualified Data.Set as S
-readLog :: (Ord v, SingleValueSerializable v) => RawFilePath -> Annex (Log v)
+readLog :: (Ord v, SingleValueSerializable v) => OsPath -> Annex (Log v)
readLog = parseLog <$$> Annex.Branch.get
-getLog :: (Ord v, SingleValueSerializable v) => RawFilePath -> Annex (Maybe v)
+getLog :: (Ord v, SingleValueSerializable v) => OsPath -> Annex (Maybe v)
getLog = newestValue <$$> readLog
-setLog :: (Ord v, SingleValueSerializable v) => Annex.Branch.RegardingUUID -> RawFilePath -> v -> Annex ()
+setLog :: (Ord v, SingleValueSerializable v) => Annex.Branch.RegardingUUID -> OsPath -> v -> Annex ()
setLog ru f v = do
c <- currentVectorClock
Annex.Branch.change ru f $ \old ->
import Annex.SpecialRemote.Config
import Annex.Verify
import qualified Utility.RawFilePath as R
+import qualified Utility.FileIO as F
import qualified Data.ByteString as S
import qualified Data.ByteString.Lazy as L
-> ChunkConfig
-> EncKey
-> Key
- -> FilePath
+ -> OsPath
-> MeterUpdate
-> Maybe (Cipher, EncKey)
-> encc
-- possible without this check.
(UnpaddedChunks chunksize) -> ifM (isStableKey k)
( do
- h <- liftIO $ openBinaryFile f ReadMode
+ h <- liftIO $ F.openBinaryFile f ReadMode
go chunksize h
liftIO $ hClose h
, storechunk k (FileContent f) p
-> ChunkConfig
-> EncKey
-> Key
- -> FilePath
+ -> OsPath
-> MeterUpdate
-> Maybe (Cipher, EncKey)
-> encc
where
go pe cks = do
let ls = map chunkKeyList cks
- currsize <- liftIO $ catchMaybeIO $ getFileSize (toRawFilePath dest)
+ currsize <- liftIO $ catchMaybeIO $ getFileSize dest
let ls' = maybe ls (setupResume ls) currsize
if any null ls'
-- dest is already complete
-- passing the whole file content to the
-- incremental verifier though.
Nothing -> do
- retriever (encryptor basek) basep (toRawFilePath dest) iv $
+ retriever (encryptor basek) basep dest iv $
retrieved iv Nothing basep
return $ case iv of
Nothing -> Right iv
opennew = do
iv <- startVerifyKeyContentIncrementally vc basek
- h <- liftIO $ openBinaryFile dest WriteMode
+ h <- liftIO $ F.openBinaryFile dest WriteMode
return (h, iv)
-- Open the file and seek to the start point in order to resume.
openresume startpoint = do
-- ReadWriteMode allows seeking; AppendMode does not.
- h <- liftIO $ openBinaryFile dest ReadWriteMode
+ h <- liftIO $ F.openBinaryFile dest ReadWriteMode
liftIO $ hSeek h AbsoluteSeek startpoint
-- No incremental verification when resuming, since that
-- would need to read up to the startpoint.
-}
writeRetrievedContent
:: LensEncParams encc
- => FilePath
+ => OsPath
-> Maybe (Cipher, EncKey)
-> encc
-> Maybe Handle
writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content) of
(Nothing, Nothing, FileContent f)
| f == dest -> noop
- | otherwise -> liftIO $ moveFile (toRawFilePath f) (toRawFilePath dest)
+ | otherwise -> liftIO $ moveFile f dest
(Just (cipher, _), _, ByteContent b) -> do
cmd <- gpgCmd <$> Annex.getGitConfig
decrypt cmd encc cipher (feedBytes b) $
withBytes content $ \b ->
decrypt cmd encc cipher (feedBytes b) $
readBytes write
- liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
+ liftIO $ removeWhenExistsWith R.removeLink (fromOsPath f)
(Nothing, _, FileContent f) -> do
withBytes content write
- liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
+ liftIO $ removeWhenExistsWith R.removeLink (fromOsPath f)
(Nothing, _, ByteContent b) -> write b
where
write b = case mh of
Nothing -> S.hPut h
in meteredWrite p writer b
Nothing -> L.hPut h b
- opendest = openBinaryFile dest WriteMode
+ opendest = F.openBinaryFile dest WriteMode
{- Can resume when the chunk's offset is at or before the end of
- the dest file. -}
withBytes :: ContentSource -> (L.ByteString -> Annex a) -> Annex a
withBytes (ByteContent b) a = a b
-withBytes (FileContent f) a = a =<< liftIO (L.readFile f)
+withBytes (FileContent f) a = a =<< liftIO (L.readFile (fromOsPath f))
when (null stored) $
giveup "no chunks were stored"
where
- basef = tmp ++ fromRawFilePath (keyFile key)
+ basef = tmp ++ fromOsPath (keyFile key)
tmpdests = map (basef ++ ) chunkStream
{- Given a list of destinations to use, chunks the data according to the
import System.PosixCompat.Files (modificationTime)
import qualified Data.Map as M
import qualified Data.Set as S
-import qualified System.FilePath.ByteString as P
repoCheap :: Git.Repo -> Bool
repoCheap = not . Git.repoIsUrl
-localpathCalc :: Git.Repo -> Maybe FilePath
+localpathCalc :: Git.Repo -> Maybe OsPath
localpathCalc r
| not (Git.repoIsLocal r) && not (Git.repoIsLocalUnknown r) = Nothing
- | otherwise = Just $ fromRawFilePath $ Git.repoPath r
+ | otherwise = Just $ Git.repoPath r
{- Checks relatively inexpensively if a repository is available for use. -}
repoAvail :: Git.Repo -> Annex Availability
gitRepoInfo :: Remote -> Annex [(String, String)]
gitRepoInfo r = do
d <- fromRepo Git.localGitDir
- mtimes <- liftIO $ mapM (\p -> modificationTime <$> R.getFileStatus p)
- =<< emptyWhenDoesNotExist (dirContentsRecursive (d P.</> "refs" P.</> "remotes" P.</> encodeBS (Remote.name r)))
+ let refsdir = d </> literalOsPath "refs"
+ </> literalOsPath "remotes"
+ </> toOsPath (Remote.name r)
+ mtimes <- liftIO $ mapM (\p -> modificationTime <$> R.getFileStatus (fromOsPath p))
+ =<< emptyWhenDoesNotExist (dirContentsRecursive refsdir)
let lastsynctime = case mtimes of
[] -> "never"
_ -> show $ posixSecondsToUTCTime $ realToFrac $ maximum mtimes
-- A source of a Key's content.
data ContentSource
- = FileContent FilePath
+ = FileContent OsPath
| ByteContent L.ByteString
isByteContent :: ContentSource -> Bool
-- content to the verifier before running the callback.
-- This should not be done when it retrieves ByteContent.
type Retriever = forall a.
- Key -> MeterUpdate -> RawFilePath -> Maybe IncrementalVerifier
+ Key -> MeterUpdate -> OsPath -> Maybe IncrementalVerifier
-> (ContentSource -> Annex a) -> Annex a
-- Action that removes a Key's content from a remote.
import qualified Utility.RawFilePath as R
import qualified Utility.FileIO as F
-import qualified System.FilePath.ByteString as P
import System.PosixCompat.Files (isSymbolicLink)
upgrade :: Bool -> Annex UpgradeResult
-- The old content identifier database is deleted here, but the
-- new database is not populated. It will be automatically
-- populated from the git-annex branch the next time it is used.
- removeOldDb . fromRawFilePath =<< fromRepo gitAnnexContentIdentifierDbDirOld
- liftIO . removeWhenExistsWith R.removeLink
+ removeOldDb =<< fromRepo gitAnnexContentIdentifierDbDirOld
+ liftIO . removeWhenExistsWith (R.removeLink . fromOsPath)
=<< fromRepo gitAnnexContentIdentifierLockOld
-- The export databases are deleted here. The new databases
-- will be populated by the next thing that needs them, the same
-- way as they would be in a fresh clone.
- removeOldDb . fromRawFilePath =<< calcRepo' gitAnnexExportDir
+ removeOldDb =<< calcRepo' gitAnnexExportDir
populateKeysDb
- removeOldDb . fromRawFilePath =<< fromRepo gitAnnexKeysDbOld
- liftIO . removeWhenExistsWith R.removeLink
+ removeOldDb =<< fromRepo gitAnnexKeysDbOld
+ liftIO . removeWhenExistsWith (R.removeLink . fromOsPath)
=<< fromRepo gitAnnexKeysDbIndexCacheOld
- liftIO . removeWhenExistsWith R.removeLink
+ liftIO . removeWhenExistsWith (R.removeLink . fromOsPath)
=<< fromRepo gitAnnexKeysDbLockOld
updateSmudgeFilter
return UpgradeSuccess
-gitAnnexKeysDbOld :: Git.Repo -> RawFilePath
-gitAnnexKeysDbOld r = gitAnnexDir r P.</> "keys"
+gitAnnexKeysDbOld :: Git.Repo -> OsPath
+gitAnnexKeysDbOld r = gitAnnexDir r </> literalOsPath "keys"
-gitAnnexKeysDbLockOld :: Git.Repo -> RawFilePath
-gitAnnexKeysDbLockOld r = gitAnnexKeysDbOld r <> ".lck"
+gitAnnexKeysDbLockOld :: Git.Repo -> OsPath
+gitAnnexKeysDbLockOld r =
+ gitAnnexKeysDbOld r <> literalOsPath ".lck"
-gitAnnexKeysDbIndexCacheOld :: Git.Repo -> RawFilePath
-gitAnnexKeysDbIndexCacheOld r = gitAnnexKeysDbOld r <> ".cache"
+gitAnnexKeysDbIndexCacheOld :: Git.Repo -> OsPath
+gitAnnexKeysDbIndexCacheOld r =
+ gitAnnexKeysDbOld r <> literalOsPath ".cache"
-gitAnnexContentIdentifierDbDirOld :: Git.Repo -> RawFilePath
-gitAnnexContentIdentifierDbDirOld r = gitAnnexDir r P.</> "cids"
+gitAnnexContentIdentifierDbDirOld :: Git.Repo -> OsPath
+gitAnnexContentIdentifierDbDirOld r =
+ gitAnnexDir r </> literalOsPath "cids"
-gitAnnexContentIdentifierLockOld :: Git.Repo -> RawFilePath
-gitAnnexContentIdentifierLockOld r = gitAnnexContentIdentifierDbDirOld r <> ".lck"
+gitAnnexContentIdentifierLockOld :: Git.Repo -> OsPath
+gitAnnexContentIdentifierLockOld r =
+ gitAnnexContentIdentifierDbDirOld r <> literalOsPath ".lck"
-removeOldDb :: FilePath -> Annex ()
+removeOldDb :: OsPath -> Annex ()
removeOldDb db =
whenM (liftIO $ doesDirectoryExist db) $ do
v <- liftIO $ tryNonAsync $
removePathForcibly db
case v of
- Left ex -> giveup $ "Failed removing old database directory " ++ db ++ " during upgrade (" ++ show ex ++ ") -- delete that and re-run git-annex to finish the upgrade."
+ Left ex -> giveup $ "Failed removing old database directory " ++ fromOsPath db ++ " during upgrade (" ++ show ex ++ ") -- delete that and re-run git-annex to finish the upgrade."
Right () -> return ()
-- Populate the new keys database with associated files and inode caches.
(l, cleanup) <- inRepo $ LsFiles.inodeCaches [top]
forM_ l $ \case
(_f, Nothing) -> giveup "Unable to parse git ls-files --debug output while upgrading git-annex sqlite databases."
- (f, Just ic) -> unlessM (liftIO $ catchBoolIO $ isSymbolicLink <$> R.getSymbolicLinkStatus (toRawFilePath f)) $ do
- catKeyFile (toRawFilePath f) >>= \case
+ (f, Just ic) -> unlessM (liftIO $ catchBoolIO $ isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath f)) $ do
+ catKeyFile f >>= \case
Nothing -> noop
Just k -> do
- topf <- inRepo $ toTopFilePath $ toRawFilePath f
+ topf <- inRepo $ toTopFilePath f
Database.Keys.runWriter AssociatedTable $ \h -> liftIO $
Database.Keys.SQL.addAssociatedFile k topf h
Database.Keys.runWriter ContentTable $ \h -> liftIO $
updateSmudgeFilter = do
lf <- Annex.fromRepo Git.attributesLocal
ls <- liftIO $ map decodeBS . fileLines'
- <$> catchDefaultIO "" (F.readFile' (toOsPath lf))
+ <$> catchDefaultIO "" (F.readFile' lf)
let ls' = removedotfilter ls
when (ls /= ls') $
- liftIO $ writeFile (fromRawFilePath lf) (unlines ls')
+ liftIO $ writeFile (fromOsPath lf) (unlines ls')
where
removedotfilter ("* filter=annex":".* !filter":rest) =
"* filter=annex" : removedotfilter rest
- to avoid exposing the secret token when launching the web browser. -}
writeHtmlShim :: String -> String -> FilePath -> IO ()
writeHtmlShim title url file =
- viaTmp (writeFileProtected . fromOsPath)
+ viaTmp (writeFileProtected)
(toOsPath $ toRawFilePath file)
(genHtmlShim title url)